In [1]:
import matplotlib as mpl
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
from scipy.misc import logsumexp
from scipy.special import erf
from scipy.stats import maxwell
import astropy.coordinates as coord
import gary.coordinates as gc
import emcee
import triangle
In [213]:
def ln_normal(x, mu, var):
return -0.5*np.log(2*np.pi) - 0.5*np.log(var) - 0.5 * (x-mu)**2 / var
In [233]:
# f_rrmg = (f_rr * N_rr) / (f_mg * N_mg)
def ln_likelihood_components(v):
V1 = halo_sigma**2
V2 = triand_sigma**2
term1 = ln_normal(v, 0., V1)
term2 = ln_normal(v, triand_v0, V2)
return np.array([term1, term2])
def ln_likelihood(p, mg_v, rr_v):
f_mg, f_rr = p
# M giants
ll_mg = ln_likelihood_components(mg_v)
ll_mg[0] += np.log(1 - f_mg)
ll_mg[1] += np.log(f_mg)
ll1 = np.sum(np.logaddexp(*ll_mg))
# M giants
ll_rr = ln_likelihood_components(rr_v)
ll_rr[0] += np.log(1 - f_rr)
ll_rr[1] += np.log(f_rr)
ll2 = np.sum(np.logaddexp(*ll_rr))
return ll1 + ll2
def ln_prior(p):
f_mg,f_rr = p
if f_mg > 1. or f_mg < 0.:
return -np.inf
if f_rr > 1. or f_rr < 0.:
return -np.inf
return 0.
def ln_posterior(p, *args):
lnp = ln_prior(p)
if np.isinf(lnp):
return -np.inf
return lnp + ln_likelihood(p, *args).sum()
In [283]:
triand_sigma = 17.5
triand_v0 = 50.
halo_sigma = 105.
# np.random.seed(8675309) # looks good
# np.random.seed(42) # looks bad
np.random.seed(1234) # looks ??
N_mg = 100
true_f_mg = 0.65
N_halo_mg = int(N_mg - true_f_mg*N_mg)
mg_v = np.append(np.random.normal(0., halo_sigma, size=N_halo_mg),
np.random.normal(triand_v0, triand_sigma, size=N_mg-N_halo_mg))
true_f_mg = float(N_mg - N_halo_mg) / float(N_mg)
N_rr = 100
true_f_rr = 0.01
N_halo_rr = int(N_rr - true_f_rr*N_rr)
rr_v = np.append(np.random.normal(0., halo_sigma, size=N_halo_rr),
np.random.normal(triand_v0, triand_sigma, size=N_rr-N_halo_rr))
np.random.shuffle(rr_v)
true_f_rrmg = (true_f_rr * N_rr) / (true_f_mg * N_mg)
true_f_mg, true_f_rr, true_f_rrmg, float(N_rr - N_halo_rr) / float(N_mg - N_halo_mg)
Out[283]:
In [284]:
fig,axes = plt.subplots(1,2,figsize=(10,4),sharex=True,sharey=True)
bins = np.linspace(-200,200,25)
axes[0].hist(mg_v, bins=bins);
axes[1].hist(rr_v, bins=bins);
# axes[1].hist(obs_rr_v, bins=bins)
fig.tight_layout()
In [285]:
fig,axes = plt.subplots(1, 1, figsize=(10,6))
for NN in [10, 20, 50, 100]:
obs_rr_v = rr_v[:NN]
fs = np.linspace(0., 1., 100)
lls = np.zeros_like(fs)
for i,f in enumerate(fs):
lls[i] = ln_posterior([true_f_mg, f], mg_v, obs_rr_v)
axes.plot(fs, np.exp(lls-lls.max()), marker=None, label=str(NN), lw=2.)
axes.legend()
axes.set_xlabel(r"$f_{\rm RR}$")
Out[285]:
In [286]:
_cache = dict()
In [287]:
nwalkers = 32
# initialize walkers
p0 = np.zeros((nwalkers,2))
p0[:,0] = np.random.uniform(0.5, 0.8, size=nwalkers)
p0[:,1] = np.random.uniform(0, 0.2, size=nwalkers)
for NN in [10, 20, 50, 100]:
obs_rr_v = rr_v[:NN]
args = [mg_v, obs_rr_v]
if NN not in _cache:
sampler = emcee.EnsembleSampler(nwalkers=nwalkers, dim=2,
lnpostfn=ln_posterior, args=args)
pos,prob,state = sampler.run_mcmc(p0, N=128)
sampler.reset()
pos,prob,state = sampler.run_mcmc(pos, N=1024)
_cache[NN] = sampler
In [288]:
fig,axes = plt.subplots(1, 2, figsize=(12,6), sharex=True, sharey=True)
bins = np.linspace(0, 1, 25)
for NN in [10, 20, 50, 100]:
sampler = _cache[NN]
axes[0].hist(sampler.flatchain[:,1], label=str(NN), lw=2., histtype='step', normed=True, bins=bins)
f_rrmg = (sampler.flatchain[:,1] * N_rr) / (sampler.flatchain[:,0] * N_mg)
axes[1].hist(f_rrmg, label=str(NN), lw=2., histtype='step', normed=True, bins=bins)
axes[0].legend()
axes[0].set_xlabel(r"$f_{\rm RR}$")
axes[1].set_xlabel(r"$f_{\rm RR:MG}$")
Out[288]:
In [ ]: